import numpy as np
import matplotlib.pyplot as plt
import pathlib
import os
import importlib
from scipy import signal
from scipy import stats
from gwpy import timeseries
from gwosc import datasets
import h5py
import funcs
from pathlib import Path
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['axes.titlesize'] = 16
event_name = 'GW170817'
detector_name = 'H'
fs = 2**12
data_folder = Path('data_files')
data_file_name = f'{detector_name}_{event_name}_{fs}_Hz.npz'
npz_file = np.load(data_folder/ data_file_name)
strain = npz_file['strain']
times = npz_file['times']
npz_file.close()
dt = times[1]-times[0]
freqs = np.fft.rfftfreq(len(strain), d=dt)
df = freqs[1]-freqs[0]
np.save(data_folder / 'strain', strain)
Load the time domain data and Fourier transform it.
tukey_window = signal.tukey(M=len(strain), alpha=0.1)
strain_f = np.fft.rfft(strain * tukey_window)
# presenting the time domain signal after tueky widnow
# NOT ASKED FOR IN THE EXAM
plt.plot(times, tukey_window * strain)
[<matplotlib.lines.Line2D at 0x16a7819f0>]
Estimate the ASD and create the whitening filter. Plot a log-log plot of the ASD from 20Hz onward.
welch_dict = {'x':strain, 'fs':fs, 'nperseg': int(64*fs), 'noverlap':int(32*fs),
'average': 'median', 'scaling': 'density'}
psd_freqs, psd_estimation = signal.welch(**welch_dict)
fmin = 20
asd = np.interp(freqs, psd_freqs, psd_estimation**(1/2))
i = np.searchsorted(psd_freqs, fmin)
plt.loglog(psd_freqs[i:], psd_estimation[i:])
plt.title('ASD')
Text(0.5, 1.0, 'ASD')
highpass_filter = np.zeros(len(freqs))
i1,i2 = np.searchsorted(freqs, (fmin, fmin+1))
highpass_filter[i1:i2] = np.sin(np.linspace(0, np.pi/2, i2-i1))**2
highpass_filter[i2:] = 1.0
whitening_filter_raw = highpass_filter / np.interp(x=freqs, xp=psd_freqs, fp=psd_estimation**(1/2))
padded_tukey_window = np.pad(signal.tukey(M=int(64*fs), alpha=0.1),
pad_width=(len(strain)-int(64*fs))//2,
constant_values = 0)
whitening_filter = highpass_filter * np.fft.rfft(
np.fft.fftshift(padded_tukey_window *
np.fft.fftshift(np.fft.irfft(whitening_filter_raw)))).real * np.sqrt(2 * dt)
plt.loglog(freqs[i1:], np.abs(whitening_filter[i1:]))
plt.ylim(1e18)
plt.title('whitening filter')
plt.xlabel('freq [Hz]')
plt.ylabel('whitening filter')
Text(0, 0.5, 'whitening filter')
plt.plot(times, np.fft.irfft(strain_f * whitening_filter))
plt.title('Whitened strain')
plt.xlabel('time')
Text(0.5, 0, 'time')
Create a single template for a search, with arbitrarily selected masses of 𝑚1 = 1.5 and 𝑚2 = 1.25 (in solar masses). Plot the time-domain template, such that it is localized in the middle of the time-axis. Fix the plot such that the waveform features are visible
import gw_search_functions_SOLVED as gw_search_functions
importlib.reload(gw_search_functions)
<module 'gw_search_functions_SOLVED' from '/Users/jonatahm/Dropbox (Weizmann Institute)/Jonathan Mushkin’s files/Tutoring/Statistics, Algorithms and Experiment Design - Spring 2023/HW/Home Exam/gw_search_functions_SOLVED.py'>
m1 = 1.5
m2 = 1.25
# phases and amplitude are not calculated below 20 Hz
phase = np.zeros_like(freqs)
phase[i1:] = gw_search_functions.masses_to_phases(m1,m2, freqs[i1:])
amp = np.zeros_like(freqs)
amp[i1:] = freqs[i1:]**(-7/6)
h = amp * np.exp(1j*phase)
normalization = np.fft.irfft(np.abs(h*whitening_filter)**2)[0]**(1/2)
h /= normalization
plt.plot(times, np.fft.fftshift(np.fft.irfft(h)))
plt.xlim(850, 1030)
plt.xlabel('time')
plt.ylabel('h')
Text(0, 0.5, 'h')
Generate the complex-overlap time-series. Plot a histogram with the real and imaginary parts of the complex-overlap, at a segment of data without an obvious glitch. Overlay the theoretical predictions.
whitened_strain_t = np.fft.irfft(strain_f * whitening_filter.conj())
# zoom on glitch, to see where not to use the overlap timeseries
plt.plot(times, whitened_strain_t)
plt.xlim(1250, 1260)
(1250.0, 1260.0)
z_cos = np.fft.irfft((strain_f * whitening_filter) * (h * whitening_filter).conj())
z_sin = np.fft.irfft((strain_f * whitening_filter) * (h * whitening_filter).conj() * 1j)
z = z_cos +1j * z_sin
snr2 = np.abs(z)**2
# indices without a glitch
t_start = 200
t_end = 1000
tslice = slice(*np.searchsorted(times, (t_start, t_end)))
# keywords for the histogram
hist_kwargs = {'bins':200,
'density':True,
'log':True,
'histtype':'step'}
# create 2 histograms
counts, edges, patches = plt.hist(z_cos[tslice], **hist_kwargs, label='z_cos')
counts, edges, patches = plt.hist(z_sin[tslice], **hist_kwargs, label='z_sin')
# overlay normal distribution with mu=0 and sigma=1
plt.plot(edges, stats.norm().pdf(edges), label='normal distribution')
plt.legend(loc='lower center')
<matplotlib.legend.Legend at 0x17fb0fc10>
Create the SNR2 time series.
To verify your last result, use the estimated ASD to draw mock data without a GW transient. Create the same SNR2 time-series on this data. On the same figure, plot the histograms of the SNR2 of the real data and of the mock data. Overlay the theoretical prediction.
# split the variance equally between the real and imaginary components of strain_f
mock_strain_f = [1,1j]@ stats.norm(scale=asd/np.sqrt(2)).rvs(size=(2, len(freqs)))
# multiply by a factor that relates to the duraion
mock_strain_f *= len(times) / 64
plt.semilogy(freqs, np.abs(strain_f), alpha=0.5)
plt.semilogy(freqs, np.abs(mock_strain_f), alpha=0.5)
plt.ylabel(r'$|{\rm strain}(f)|$')
plt.xlabel('frequency [Hz]')
Text(0.5, 0, 'frequency [Hz]')
# calculate the SNR^2 for the mock data
mock_snr2 = np.abs(
np.fft.irfft((mock_strain_f * whitening_filter) * (h * whitening_filter).conj())
+1j * np.fft.irfft((mock_strain_f * whitening_filter) * (h * whitening_filter).conj() * 1j)
)**2
hist_kwargs = {'histtype': 'step',
'density': True,
'log': True,
'bins':range(200)}
counts, edges, patches = plt.hist(snr2, **hist_kwargs, label=r'real data SNR$^2$')
counts, edges, pathes = plt.hist(mock_snr2, **hist_kwargs, label='mock data SNR$^2$')
plt.plot(edges, stats.chi2(df=2).pdf(edges), label='$\chi^2(2)$ pdf')
# focus on interesting portion of the histogram
plt.xlim(0, 100)
plt.ylim(1/np.diff(edges).mean()/len(snr2)/10)
(1.1920928955078126e-08, 72.37441257409046)
Create a test-statistic to detect glitches. Use it to remove glitches from the SNR2 timeseries. Plot the cleaned SNR2 timeseries histogram. Overlay the theoretical prediction.
# find f_bar = where the cumulative SNR2 is equal half the overall SNR2
frac_snr2 = np.cumsum(np.abs(h*whitening_filter)**2)
frac_snr2 /= frac_snr2[-1]
i_fbar = np.searchsorted(frac_snr2, 0.5)
# create the the low and high frequencies templates
h_low, h_high = np.zeros((2, len(freqs)), dtype=complex)
# normalize them so each has norm 1
h_low[:i_fbar] = h[:i_fbar] * np.sqrt(2)
h_high[i_fbar:] = h[i_fbar:] * np.sqrt(2)
# check their normalization
print(np.fft.irfft((h * whitening_filter) * (h * whitening_filter).conj())[0]**(1/2))
print(np.fft.irfft((h_low * whitening_filter) * (h_low * whitening_filter).conj())[0]**(1/2))
print(np.fft.irfft((h_high * whitening_filter) * (h_high * whitening_filter).conj())[0]**(1/2))
0.9999999999999999 0.9999988070139046 1.000001192984672
z_low, z_high = [(np.fft.irfft(strain_f * whitening_filter * (x*whitening_filter).conj())
+ 1j * np.fft.irfft(strain_f * whitening_filter * (x*whitening_filter).conj() * 1j))
for x in [h_low, h_high]]
# create 2 scatter plots of z_cos-z_sin (real vs imaginary) around and not around a glitch.
# NOT ASKED FOR IN THE EXAM
fig, axs = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True)
tslice = slice(*np.searchsorted(times, (100, 101)))
axs[0].scatter((z_low-z_high)[tslice].real, (z_low-z_high)[tslice].imag, s=1, alpha=0.5)
axs[0].set_title('not around glitch')
tslice = slice(*np.searchsorted(times, (1258, 1259)))
axs[1].scatter((z_low-z_high)[tslice].real, (z_low-z_high)[tslice].imag, s=1, alpha=0.5)
axs[1].set_title('around glitch')
for ax in axs:
ax.set_xlabel(r'$z_\cos - z_\sin$ (real)')
ax.set_ylabel(r'$z_\cos - z_\sin$ (imaginary)')
fig.tight_layout()
# glitch_test_statistic = 0.5 * np.abs(
# z_low * np.interp(x=times, xp=times[::psd_drift_increment], fp=2/snr2_averaged_lf)
# - z_high * np.interp(x=times, xp=times[::psd_drift_increment], fp=2/snr2_averaged_hf))**2
glitch_test_statistic = 0.5 * np.abs(z_low - z_high)**2
glitch_test_threshold = stats.chi2(df=2).isf(0.01) # throw away one in a 100 good signals
glitch_mask = (glitch_test_statistic > glitch_test_threshold) * (snr2 > 5**2)
hist_kwargs = {'histtype': 'step',
'density': True,
'log': True,
'bins': range(200)}
counts, edges, patches = plt.hist(snr2,**hist_kwargs, label=r'SNR$^2$ before glitch-vetoing')
counts, edges, patches = plt.hist(snr2[~glitch_mask],**hist_kwargs, label=r'SNR$^2$ after glitch vetoing')
plt.plot(edges, stats.chi2(df=2).pdf(edges))
y_lower_limit = 0.5 / (np.diff(edges).mean() *len(snr2))
plt.xlim(right=100)
plt.ylim(y_lower_limit)
plt.legend()
<matplotlib.legend.Legend at 0x16be994e0>
Now you know how to conduct a search with a single template. We now go on to prepare a bank of templates. The first step is to find a good linear basis to work with. Draw $2^8$ mass samples (see given functions). Create the waveform for each and perform SVD to find their phase basis-vectors. Choose the number of basis-vectors you want to use. Plot the basis-vectors against frequency.
importlib.reload(gw_search_functions)
<module 'gw_search_functions_SOLVED' from '/Users/jonatahm/Dropbox (Weizmann Institute)/Jonathan Mushkin’s files/Tutoring/Statistics, Algorithms and Experiment Design - Spring 2023/HW/Home Exam/gw_search_functions_SOLVED.py'>
gw_search_functions.MCHIRP_RANGE
(1.15, 1.2)
# mchirp_range = [1.186 - 0.001 , 1.186 + 0.001]
# q_range = [0.74, 1]
qmc_sequence = stats.qmc.Halton(d=2).random(2**8)
m1,m2 = gw_search_functions.draw_mass_samples(2**8)
fslice = slice(np.searchsorted(freqs, (fmin)), len(freqs), 128)
fs = freqs[fslice]
phases = gw_search_functions.masses_to_phases(m1,m2, fs)
wht_amp = (amp * whitening_filter)[fslice]
wht_amp = wht_amp / np.sqrt(np.sum(wht_amp**2))
linear_free_phass = gw_search_functions.phases_to_linear_free_phases(phases, freqs, weights=wht_amp)
plt.plot(fs, linear_free_phass.T);
common_phase_evolution = linear_free_phass.mean(axis=0)
phases_without_common_evolution = linear_free_phass - common_phase_evolution
svd_phase = phases_without_common_evolution
svd_weights = wht_amp
# this part takes a few minutes
u,d,v = np.linalg.svd(svd_phase * svd_weights)
plt.semilogy(d[:20], '.')
[<matplotlib.lines.Line2D at 0x3ff561c60>]
# check that eignen vectors has zero weighted mean. Meaning, they are orthogonal to a constant function.
(v[0]*svd_weights).mean(), (v[1]*svd_weights).mean()
(-1.5762437188525012e-05, -9.970250324066757e-06)
import pickle
np.savez('outputs/GW170817_H_svd', u=u, d=d, v=v, freqs_sliced=fs,freqs=freqs)
u,d,v = [np.load('outputs/GW170817_H_svd.npz').get(k) for k in ('u','d','v')]
# pick 2 coordinates
ndim = 2
u = u[:, :ndim]
d = d[:ndim]
v = v[:ndim, :]
# create a phase vector (without weights) from SVD components
# and new set of coordiantes
coordinates = u * d
phase_basis_coarse_freqs = np.zeros_like(v)
mask = svd_weights!=0
phase_basis_coarse_freqs[:, mask] = v[:, mask]/svd_weights[mask]
plt.scatter(*coordinates.T,s=2.5,marker='.',alpha=1)
<matplotlib.collections.PathCollection at 0x3ff609810>
print(np.sum(svd_weights**2))
print(np.sum(svd_weights**2 * phase_basis_coarse_freqs[0]**2))
print(np.sum(svd_weights**2 * phase_basis_coarse_freqs[1]))
print(np.sum(svd_weights**2 * phase_basis_coarse_freqs[0]))
print(np.sum(svd_weights**2 * phase_basis_coarse_freqs[0]**2))
1.0000000000000002 0.9999999999999972 -0.32352465276564224 -0.5114753243304481 0.9999999999999972
plt.plot(phase_basis_coarse_freqs[0])
plt.plot(phase_basis_coarse_freqs[1])
[<matplotlib.lines.Line2D at 0x3ff652e60>]
phase_basis = np.array(
[np.interp(x=freqs, xp=freqs[fslice],
fp=phase_base, left=0)
for phase_base in phase_basis_coarse_freqs])
plt.plot(freqs, phase_basis.T)
[<matplotlib.lines.Line2D at 0x3ff633cd0>, <matplotlib.lines.Line2D at 0x3ff632e90>]
print(np.sum(phase_basis[0]**2 * np.abs(h*whitening_filter)**2) / len(freqs))
print(np.sum(phase_basis[0] * phase_basis[1]
*np.abs(h*whitening_filter)**2) / len(freqs))
print(np.sum(phase_basis[1] * phase_basis[1]
*np.abs(h*whitening_filter)**2) / len(freqs))
1.0006791075690373 0.001434266405347329 1.0009271862660045
Calculate the inner product between waveforms at different coordinate-distance $\sqrt{\sum_\alpha |\Delta c_\alpha |^2)}$. Plot their overlap against their distance, and the theoretical prediction.
distances = 10**np.linspace(-2, 0.5 , num=30)
amp = np.zeros_like(fs)
cond = whitening_filter[fslice]!=0
amp[cond] = freqs[fslice][cond]**(-7/6)
amp_wht = (amp * whitening_filter[fslice])
amp_wht /= np.sqrt(np.sum((amp_wht)**2))
matches =np.zeros_like(distances)
for i, distance in enumerate(distances):
dist_per_coordinate = distance/np.sqrt(2)
coordinate = np.array([dist_per_coordinate, dist_per_coordinate])
phase = coordinate @ phase_basis_coarse_freqs + common_phase_evolution
matches[i] = np.sum(amp_wht*np.exp(-1j*phase) * amp_wht*np.exp(+1j*common_phase_evolution)).real
plt.loglog(distances**2, 1-matches, label='$1 - $ match')
plt.loglog(distances**2, distances**2/2, '.', label=r'$1-\frac{1}{2}\sum_\alpha|\Delta c_\alpha |^2$')
plt.xlabel(r'distance = $\sum_\alpha |\Delta c_\alpha |^2$')
plt.ylabel('mismatch = $1 - $match')
plt.legend(loc='upper left')
<matplotlib.legend.Legend at 0x44ee9dab0>
Draw $2^{13}$ mass samples. Create the phase for each, and find the coordinates of each. Select a subset such that the distance between any 2 samples is not smaller than 0.1. On the same plot, create a scatter plot of the 213 samples and of the selected subset. On the plot, write down the size of subset. This subset defines the search bank.
fslice = slice(np.searchsorted(freqs, (fmin)), len(freqs), 128)
fs = freqs[fslice]
m1, m2 = gw_search_functions.draw_mass_samples(2**13)
plt.scatter(m1,m2,marker='.',s=1)
plt.scatter( 1.46 * (1+0.01) , 1.27*(1+0.01),s=5, marker='x')
# plt.scatter(*np.array([[_1, _2 ]
# for _1 in (1.36*1.01, 1.60*1.01)
# for _2 in (1.17*1.01 , 1.36*1.01)]).T, marker='x', c='b')
<matplotlib.collections.PathCollection at 0x44f35c520>
phases_on_coarse_freqs = gw_search_functions.masses_to_phases(m1, m2, fs)
linear_free_phass = gw_search_functions.phases_to_linear_free_phases(phase_basis_coarse_freqs, fs, amp_wht)
phases_without_common_evolution = phases_on_coarse_freqs - common_phase_evolution
coordinates = (svd_weights**2 * phases_without_common_evolution) @ phase_basis_coarse_freqs.T
bank_coordinates, bank_indices = gw_search_functions.select_points_without_clutter(coordinates, np.sqrt(0.1))
plt.scatter(*coordinates.T, s=1, alpha=0.5, c='r')
plt.scatter(*bank_coordinates.T, s=5, c='k')
print(bank_coordinates.shape)
(156, 2)
amp = np.zeros_like(freqs)
amp[i1:] = freqs[i1:]**(-7/6)
normalization = gw_search_functions.correlate(amp, amp, w=whitening_filter)[0]**(1/2)
amp /= normalization
indices_lists = []
snr2_lists = []
min_snr2_to_save = stats.chi2(df=2).isf(1/(times[-1]/0.1))
glitch_test_threshold = stats.chi2(df=2).isf(0.01)
snr2_lists_raw = []
indices_lists_raw = []
glitch_mask_list = []
common_phase_evolution_high_res = np.interp(x=freqs, xp=fs, fp=common_phase_evolution)
fs = 1/dt
import time
t_start = time.time()
for template_index, template_coordinate in enumerate(bank_coordinates):
phase = common_phase_evolution_high_res + template_coordinate @ phase_basis
h = amp * np.exp(1j*phase)
snr2 = gw_search_functions.snr2_timeseries(h*whitening_filter,
strain_f * whitening_filter)
h_low, h_high = np.zeros((2, len(freqs)), complex)
h_low[:i_fbar] = h[:i_fbar]
h_high[i_fbar:] = h[i_fbar:]
z_low = gw_search_functions.complex_overlap_timeseries(h_low * whitening_filter,
strain_f * whitening_filter)
z_high = gw_search_functions.complex_overlap_timeseries(h_high * whitening_filter,
strain_f * whitening_filter)
glitch_test_statistic = np.abs(z_low - z_high)**2
glitch_mask = (glitch_test_statistic > glitch_test_threshold) * (snr2 > 10)
glitch_mask_list.append(glitch_mask)
maxs, argmaxs = gw_search_functions.max_argmax_over_n_samples(
snr2* ~glitch_mask, int(0.1*fs))
indices_lists.append(argmaxs)
snr2_lists.append(maxs)
maxs, argmaxs = gw_search_functions.max_argmax_over_n_samples(snr2, int(0.1*fs))
indices_lists_raw.append(argmaxs)
snr2_lists_raw.append(maxs)
t = time.time()
if (template_index< 10) or (template_index % 10 ==0):
print(f'{template_index}: time passed {t-t_start:.3g} seconds')
0: time passed 1.85 seconds 1: time passed 2.83 seconds 2: time passed 3.77 seconds 3: time passed 4.69 seconds 4: time passed 5.6 seconds 5: time passed 6.52 seconds 6: time passed 7.43 seconds 7: time passed 8.34 seconds 8: time passed 9.26 seconds 9: time passed 10.2 seconds 10: time passed 11.1 seconds 20: time passed 20.6 seconds 30: time passed 30 seconds 40: time passed 39.6 seconds 50: time passed 49.4 seconds 60: time passed 59.4 seconds 70: time passed 70.3 seconds 80: time passed 80.1 seconds 90: time passed 90.5 seconds 100: time passed 101 seconds 110: time passed 110 seconds 120: time passed 120 seconds 130: time passed 130 seconds 140: time passed 139 seconds 150: time passed 149 seconds
snr2_per_template = np.array(snr2_lists)
time_indices_per_template = np.array(indices_lists)
snr2_per_template_with_glitches = np.array(snr2_lists_raw)
np.savez('outputs/GW170817_search', indices = np.array(indices_lists), snr2=np.array(snr2_lists), bank_coordinates =bank_coordinates,
phase_basis_coarse_freqs = phase_basis_coarse_freqs, freqs_coarse_grid = freqs[fslice], freqs=freqs, whitening_filter=whitening_filter)
hist_kwargs = {'histtype':'step',
'density':False,
'log':True,
'bins':200}
counts, edges, patches = plt.hist(snr2_per_template_with_glitches.flatten(), **hist_kwargs)
hist_kwargs = {'histtype':'step',
'density':False,
'log':True,
'bins':edges}
counts, edges, patches = plt.hist(snr2_per_template.flatten(), **hist_kwargs, alpha=0.5)
hist_kwargs = {'histtype':'step',
'density':True,
'log':True,
'bins':200}
counts, edges, patches = plt.hist(snr2_per_template.max(axis=0), **hist_kwargs, alpha=0.5)
hist_kwargs = {'histtype':'step',
'density':True,
'log':True,
'bins':200}
counts, edges, patches = plt.hist(snr2_per_template_with_glitches.max(axis=0).flatten(), **hist_kwargs, ls='--')
j = snr2_per_template.argmax(axis=0) # maximum snr2 template number per 1-second of data
print(j)
[ 74 16 32 ... 0 64 132]
plt.plot(np.linspace(0, 2048, snr2_per_template.shape[1]), snr2_per_template.max(axis=0))
[<matplotlib.lines.Line2D at 0x52f5ad930>]
best_template_index, best_timestamp_index = np.unravel_index(snr2_per_template.argmax(), snr2_per_template.shape)
m1[best_template_index], m2[best_template_index]
(1.425534692233721, 1.26698193531312)
p = Probability that non of N = N_templates * N_times individual experiments will reach value x or higher : $$p = 1 - (SF(x))^N$$
Probability that at least one of $N$ experiments will reach value x: $1 - p = (SF(x))^N $
At this high SNR^2, the FAR is almost exactly zero
N_templates = bank_coordinates.shape[0]
N_trials = N_templates * len(times)
stats.chi2(df=2).sf(snr2_per_template.max())**N_trials
0.0
1.0
np.log(1- stats.chi2(df).sf(snr2_per_template.max()))
def FAR(x,N):
dist = stats.chi2(df=2)
return 1 - dist.cdf(x)**N
x = np.linspace(0.01, 100, 1000)
plt.plot(x, log_FAR(x, snr2_per_template.size))
plt.plot(x, np.log(FAR(x, snr2_per_template.size)), ls='--')
use binomial to imporve the calculation
$$ FAR = 1 - CDF(x)^N = 1 - (1-SF(x))^N \approx 1 - 1 + N\cdot SF(x) = N\cdot SF(x) $$
snr2_per_template.size * stats.chi2(df=2).sf(snr2_per_template.max())
6.9261657345906676e-65
# create the histogram in 2 steps. So I can calibrate the dynamic range in the second histogram using the fist histogram
specgram_kwargs = {'x': np.fft.irfft(strain_f*whitening_filter),
'NFFT': int(fs*0.5),
'noverlap':int(fs*0.25),
'scale': 'linear',
'vmin':0,
'vmax':25,
'Fs':fs}
o = plt.specgram(**specgram_kwargs)
specgram_kwargs = {'x': np.fft.irfft(strain_f*whitening_filter),
'NFFT': int(fs*0.5),
'noverlap':int(fs*0.25),
'scale': 'linear',
'vmin':0,
'vmax':o[0][(o[1]>20)*(o[1]<1000)].std()*5,
'Fs':fs}
o = plt.specgram(**specgram_kwargs)
tmin = 0.1 * 10259 - 6
tmax = 0.1 * 10259 + 1
fmin = 20
fmax = 1000
plt.xlim(tmin, tmax)
plt.ylim(20, 1000)
(20.0, 1000.0)
plt.figure(figsize=(20, 10))
plt.imshow(snr2_per_template, aspect='auto', vmin=0, vmax=25)
<matplotlib.image.AxesImage at 0x52f7211e0>
snr2_per_template.mean(), snr2_per_template.max(), snr2_per_template.std()
(9.41063405590657, 308.9003629602669, 3.6211307749455677)
plt.figure(figsize=(20, 10))
plt.imshow(snr2_per_template_with_glitches, aspect='auto', vmin=0, vmax=25)
<matplotlib.image.AxesImage at 0x30ef61e70>
hist_kwargs = {'histtype':'step',
'density':True,
'log':True,
'bins':200}
plt.hist(snr2_before_veto.flatten(), **hist_kwargs);
plt.hist(snr2_after_veto.flatten(), **hist_kwargs, ls='--');
--------------------------------------------------------------------------- NameError Traceback (most recent call last) Cell In[153], line 5 1 hist_kwargs = {'histtype':'step', 2 'density':True, 3 'log':True, 4 'bins':200} ----> 5 plt.hist(snr2_before_veto.flatten(), **hist_kwargs); 6 plt.hist(snr2_after_veto.flatten(), **hist_kwargs, ls='--'); NameError: name 'snr2_before_veto' is not defined
hist_kwargs = {'histtype':'step',
'density':True,
'log':True,
'bins':range(60)}
plt.hist(snr2_before_veto.max(axis=0), **hist_kwargs);
plt.hist(snr2_after_veto.max(axis=0), **hist_kwargs, ls='--');
--------------------------------------------------------------------------- NameError Traceback (most recent call last) Cell In[154], line 5 1 hist_kwargs = {'histtype':'step', 2 'density':True, 3 'log':True, 4 'bins':range(60)} ----> 5 plt.hist(snr2_before_veto.max(axis=0), **hist_kwargs); 6 plt.hist(snr2_after_veto.max(axis=0), **hist_kwargs, ls='--'); NameError: name 'snr2_before_veto' is not defined
plt.plot(snr2_before_veto.max(axis=0))
[<matplotlib.lines.Line2D at 0x2e36f7a00>]